void amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
int is_vmce_ready(struct mcinfo_bank *bank, struct domain *d);
+int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn);
u64 mce_cap_init(void);
extern int firstbank;
BUG_ON( result->owner == DOMID_COW );
if ( result->owner != DOMID_XEN ) {
d = get_domain_by_id(result->owner);
+ ASSERT(d);
+ gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
+
if ( !is_vmce_ready(bank, d) )
{
- /* Should not inject vMCE to guest */
- if ( d )
- put_domain(d);
- return;
+ printk("DOM%d not ready for vMCE\n", d->domain_id);
+ goto vmce_failed;
+ }
+
+ if ( unmmap_broken_page(d, _mfn(mfn), gfn) )
+ {
+ printk("Unmap broken memory %lx for DOM%d failed\n",
+ mfn, d->domain_id);
+ goto vmce_failed;
}
- ASSERT(d);
- gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
bank->mc_addr = gfn << PAGE_SHIFT |
(bank->mc_addr & (PAGE_SIZE -1 ));
if ( fill_vmsr_data(bank, d,
{
mce_printk(MCE_QUIET, "Fill vMCE# data for DOM%d "
"failed\n", result->owner);
- put_domain(d);
- domain_crash(d);
- return;
+ goto vmce_failed;
}
+
/* We will inject vMCE to DOMU*/
if ( inject_vmce(d) < 0 )
{
mce_printk(MCE_QUIET, "inject vMCE to DOM%d"
" failed\n", d->domain_id);
- put_domain(d);
- domain_crash(d);
- return;
+ goto vmce_failed;
}
/* Impacted domain go on with domain's recovery job
* if the domain has its own MCA handler.
*/
result->result = MCA_RECOVERED;
put_domain(d);
+
+ return;
+vmce_failed:
+ put_domain(d);
+ domain_crash(d);
}
}
}
return 0;
}
+
+/* It's said some ram is setup as mmio_direct for UC cache attribute */
+#define P2M_UNMAP_TYPES (p2m_to_mask(p2m_ram_rw) \
+ | p2m_to_mask(p2m_ram_logdirty) \
+ | p2m_to_mask(p2m_ram_ro) \
+ | p2m_to_mask(p2m_mmio_direct))
+
+/*
+ * Currently all CPUs are redenzevous at the MCE softirq handler, no
+ * need to consider paging p2m type
+ * Currently only support HVM guest with EPT paging mode
+ * XXX following situation missed:
+ * PoD, Foreign mapped, Granted, Shared
+ */
+int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn)
+{
+ mfn_t r_mfn;
+ struct p2m_domain *p2m;
+ p2m_type_t pt;
+
+ /* Always trust dom0's MCE handler will prevent future access */
+ if ( d == dom0 )
+ return 0;
+
+ if (!mfn_valid(mfn_x(mfn)))
+ return -EINVAL;
+
+ if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
+ return -ENOSYS;
+
+ p2m = p2m_get_hostp2m(d);
+ ASSERT(p2m);
+
+ /* This only happen for PoD memory, which should be handled seperetely */
+ if (gfn > p2m->max_mapped_pfn)
+ return -EINVAL;
+
+ r_mfn = gfn_to_mfn_query(p2m, gfn, &pt);
+ if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
+ {
+ ASSERT(mfn_x(r_mfn) == mfn_x(mfn));
+ p2m_change_type(p2m, gfn, pt, p2m_ram_broken);
+ return 0;
+ }
+
+ return -1;
+}
+